In [1]:
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from termcolor import colored

from PIL import Image
from cv2 import resize
import cv2

In [2]:
train = pd.read_csv('/home/mckc/image class//train.csv')
test = pd.read_csv('/home/mckc/image class//test.csv')

print 'the training data shape is ',train.shape
print 'the test data shape is ', test.shape
train_records = train.shape[0]
test_records = test.shape[0]
    
X_tr = np.zeros((1,3,224,224),dtype=np.uint8)
Y_tr =[]
iteration = 0
for i in train.values[:,0]:
    image = resize(np.array(Image.open(i)),(224,224)).astype(np.float32)
    image[:,:,0] -= 103.939
    image[:,:,1] -= 116.779
    image[:,:,2] -= 123.68
    image = image.transpose((2,0,1))
    X_tr =  np.vstack((X_tr,np.expand_dims(image, axis=0)))
    Y_tr = np.append(Y_tr,train.values[iteration,1])
    iteration+=1
    if iteration % 50==0:
        print colored((float(iteration)/train_records*100 ,' Percentage complete'), 'green')
    

X_tr = X_tr[1:,:,:]
                
iteration = 0      
X_ts = np.zeros((1,3,224,224),dtype=np.uint8)
Y_ts = []
for i in test.values[:,0]:
    image = resize(np.array(Image.open(i)),(224,224)).astype(np.float32)
    image[:,:,0] -= 103.939
    image[:,:,1] -= 116.779
    image[:,:,2] -= 123.68
    image = image.transpose((2,0,1))
    X_ts =  np.vstack((X_ts,np.expand_dims(image, axis=0)))
    Y_ts = np.append(Y_ts,test.values[iteration,1])
    iteration+=1
    if iteration % 50==0:
        print colored((float(iteration)/test_records*100 ,' Percentage complete'), 'green')
    
X_ts = X_ts[1:,:,:]
print 'the training file shape',X_tr.shape,Y_tr.shape
print 'the testing file shape',X_ts.shape,Y_ts.shape


the training data shape is  (113, 2)
the test data shape is  (38, 2)
(44.24778761061947, ' Percentage complete')
(88.49557522123894, ' Percentage complete')
the training file shape (113, 3, 224, 224) (113,)
the testing file shape (38, 3, 224, 224) (38,)

In [3]:
def simulate(X,Y):
    import scipy as sp
    import scipy.ndimage
    complete = np.zeros((1,3,224,224),dtype=np.uint8)
    Y_complete = []
    for i in range(len(X)):
        complete = np.vstack((complete,X[i,:,:,:].reshape(-1,3,224,224)))
        complete = np.vstack((complete,scipy.ndimage.rotate(X[i,:,:,:], angle = 5,reshape=False,cval=1).reshape(-1,3,224,224)))
        complete = np.vstack((complete,scipy.ndimage.rotate(X[i,:,:,:], angle = 10,reshape=False,cval=1).reshape(-1,3,224,224)))
        complete = np.vstack((complete,scipy.ndimage.rotate(X[i,:,:,:], angle = 15,reshape=False,cval=1).reshape(-1,3,224,224)))
        complete = np.vstack((complete,scipy.ndimage.rotate(X[i,:,:,:], angle = -5,reshape=False,cval=1).reshape(-1,3,224,224)))
        complete = np.vstack((complete,scipy.ndimage.rotate(X[i,:,:,:], angle = -15,reshape=False,cval=1).reshape(-1,3,224,224)))
        complete = np.vstack((complete,scipy.ndimage.rotate(X[i,:,:,:], angle = -10,reshape=False,cval=1).reshape(-1,3,224,224)))
        rotated = np.fliplr(X[i,:,:,:])
        complete = np.vstack((complete,scipy.ndimage.rotate(rotated, angle = 5,reshape=False,cval=1).reshape(-1,3,224,224)))
        complete = np.vstack((complete,scipy.ndimage.rotate(rotated, angle = 10,reshape=False,cval=1).reshape(-1,3,224,224)))
        complete = np.vstack((complete,scipy.ndimage.rotate(rotated, angle = 15,reshape=False,cval=1).reshape(-1,3,224,224)))
        complete = np.vstack((complete,scipy.ndimage.rotate(rotated, angle = -5,reshape=False,cval=1).reshape(-1,3,224,224)))
        complete = np.vstack((complete,scipy.ndimage.rotate(rotated, angle = -10,reshape=False,cval=1).reshape(-1,3,224,224)))
        complete = np.vstack((complete,scipy.ndimage.rotate(rotated, angle = -15,reshape=False,cval=1).reshape(-1,3,224,224)))
        complete = np.vstack((complete,rotated.reshape(-1,3,224,224)))
        Y_complete = np.append(Y_complete,([Y[i]]*14))
        if i % 10==0:
            print colored((float(i)/len(X)*100 ,' Percentage complete'),'green')
    complete = complete[1:,:,:]
    return complete,Y_complete

In [4]:
import time
start_time = time.clock()
X,Y = simulate(X_tr,Y_tr)
print X.shape,Y.shape
print time.clock() - start_time, "seconds"


(0.0, ' Percentage complete')
(8.849557522123893, ' Percentage complete')
(17.699115044247787, ' Percentage complete')
(26.548672566371685, ' Percentage complete')
(35.39823008849557, ' Percentage complete')
(44.24778761061947, ' Percentage complete')
(53.09734513274337, ' Percentage complete')
(61.94690265486725, ' Percentage complete')
(70.79646017699115, ' Percentage complete')
(79.64601769911505, ' Percentage complete')
(88.49557522123894, ' Percentage complete')
(97.34513274336283, ' Percentage complete')
(1582, 3, 224, 224) (1582,)
117.947793 seconds

In [5]:
#from keras.datasets import cifar10
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
#(X_train, y_train), (X_test, y_test) = cifar10.load_data()
def standard(X):
    return (X - X.mean())/X.max()

#X_ts = standard(X_ts)
#X= standard(X)

map, Y_number = np.unique(Y, return_inverse=True)
Y_test_number = np.unique(Y_ts, return_inverse=True)[1]

Y_train = np_utils.to_categorical(Y_number, 2)
Y_test = np_utils.to_categorical(Y_test_number, 2)

datagen = ImageDataGenerator(
    featurewise_center=True,
    featurewise_std_normalization=True,
    rotation_range=20,
    width_shift_range=0.2,
    height_shift_range=0.2,
    horizontal_flip=True,
    )

# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_tr.astype(np.float32))


Using Theano backend.
Using gpu device 0: Quadro M2000M (CNMeM is enabled with initial size: 95.0% of memory, cuDNN 5005)

In [6]:
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD,RMSprop,Adadelta,Adagrad

def VGG_16(weights_path=None):
    model = Sequential()
    model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='softmax'))

    if weights_path:
        model.load_weights(weights_path)
        

    return model

model = VGG_16('/home/mckc/Downloads/vgg16_weights.h5')
lables = np.load('/home/mckc/labels.npy')
#model = VGG_16('/home/mckc/Face_code/face.h5')

model.layers.pop()
model.add(Dense(2, activation='softmax'))

adagrad = Adagrad(lr=0.001, epsilon=1e-08)
model.compile(optimizer=adagrad, loss='categorical_crossentropy',metrics=['accuracy'])

im = cv2.resize(cv2.imread('/home/mckc/cat.jpg'), (224, 224)).astype(np.float32) im[:,:,0] -= 103.939 im[:,:,1] -= 116.779 im[:,:,2] -= 123.68 im = im.transpose((2,0,1)) im = np.expand_dims(im, axis=0) out = model.predict(im) print np.argmax(out) im = im[0,:,:,:].transpose(1,2,0) im[:,:,0] += 103.939 im[:,:,1] += 116.779 im[:,:,2] += 123.68 plt.imshow(im.astype(np.uint8)) print lables[np.argmax(out)]

im = cv2.resize(cv2.imread('/home/mckc/dog.jpg'), (224, 224)).astype(np.float32) im[:,:,0] -= 103.939 im[:,:,1] -= 116.779 im[:,:,2] -= 123.68 im = im.transpose((2,0,1)) im = np.expand_dims(im, axis=0) out = model.predict(im) print np.argmax(out) im = im[0,:,:,:].transpose(1,2,0) im[:,:,0] += 103.939 im[:,:,1] += 116.779 im[:,:,2] += 123.68 plt.imshow(im.astype(np.uint8)) print lables[np.argmax(out)]


In [7]:
model.fit(X,Y_train,verbose=1,validation_data=(X_ts,Y_test),nb_epoch=100,batch_size=3)


Train on 1582 samples, validate on 38 samples
Epoch 1/100
1582/1582 [==============================] - 271s - loss: 0.6829 - acc: 0.6795 - val_loss: 0.6775 - val_acc: 0.6842
Epoch 2/100
1582/1582 [==============================] - 271s - loss: 0.6747 - acc: 0.6814 - val_loss: 0.6716 - val_acc: 0.6842
Epoch 3/100
1582/1582 [==============================] - 271s - loss: 0.6699 - acc: 0.6814 - val_loss: 0.6675 - val_acc: 0.6842
Epoch 4/100
1582/1582 [==============================] - 271s - loss: 0.6664 - acc: 0.6814 - val_loss: 0.6644 - val_acc: 0.6842
Epoch 5/100
1582/1582 [==============================] - 271s - loss: 0.6636 - acc: 0.6814 - val_loss: 0.6617 - val_acc: 0.6842
Epoch 6/100
1582/1582 [==============================] - 271s - loss: 0.6612 - acc: 0.6814 - val_loss: 0.6595 - val_acc: 0.6842
Epoch 7/100
1582/1582 [==============================] - 271s - loss: 0.6592 - acc: 0.6814 - val_loss: 0.6576 - val_acc: 0.6842
Epoch 8/100
1582/1582 [==============================] - 271s - loss: 0.6574 - acc: 0.6814 - val_loss: 0.6559 - val_acc: 0.6842
Epoch 9/100
1582/1582 [==============================] - 271s - loss: 0.6558 - acc: 0.6814 - val_loss: 0.6544 - val_acc: 0.6842
Epoch 10/100
1582/1582 [==============================] - 271s - loss: 0.6544 - acc: 0.6814 - val_loss: 0.6530 - val_acc: 0.6842
Epoch 11/100
1582/1582 [==============================] - 271s - loss: 0.6532 - acc: 0.6814 - val_loss: 0.6518 - val_acc: 0.6842
Epoch 12/100
1582/1582 [==============================] - 271s - loss: 0.6520 - acc: 0.6814 - val_loss: 0.6506 - val_acc: 0.6842
Epoch 13/100
1582/1582 [==============================] - 271s - loss: 0.6509 - acc: 0.6814 - val_loss: 0.6495 - val_acc: 0.6842
Epoch 14/100
1582/1582 [==============================] - 271s - loss: 0.6499 - acc: 0.6814 - val_loss: 0.6486 - val_acc: 0.6842
Epoch 15/100
1582/1582 [==============================] - 271s - loss: 0.6490 - acc: 0.6814 - val_loss: 0.6477 - val_acc: 0.6842
Epoch 16/100
1582/1582 [==============================] - 271s - loss: 0.6481 - acc: 0.6814 - val_loss: 0.6468 - val_acc: 0.6842
Epoch 17/100
1582/1582 [==============================] - 271s - loss: 0.6473 - acc: 0.6814 - val_loss: 0.6460 - val_acc: 0.6842
Epoch 18/100
1582/1582 [==============================] - 271s - loss: 0.6466 - acc: 0.6814 - val_loss: 0.6453 - val_acc: 0.6842
Epoch 19/100
1582/1582 [==============================] - 271s - loss: 0.6459 - acc: 0.6814 - val_loss: 0.6446 - val_acc: 0.6842
Epoch 20/100
1582/1582 [==============================] - 271s - loss: 0.6452 - acc: 0.6814 - val_loss: 0.6439 - val_acc: 0.6842
Epoch 21/100
1582/1582 [==============================] - 271s - loss: 0.6446 - acc: 0.6814 - val_loss: 0.6433 - val_acc: 0.6842
Epoch 22/100
1582/1582 [==============================] - 271s - loss: 0.6440 - acc: 0.6814 - val_loss: 0.6427 - val_acc: 0.6842
Epoch 23/100
1582/1582 [==============================] - 271s - loss: 0.6434 - acc: 0.6814 - val_loss: 0.6421 - val_acc: 0.6842
Epoch 24/100
1582/1582 [==============================] - 271s - loss: 0.6429 - acc: 0.6814 - val_loss: 0.6416 - val_acc: 0.6842
Epoch 25/100
1582/1582 [==============================] - 271s - loss: 0.6424 - acc: 0.6814 - val_loss: 0.6411 - val_acc: 0.6842
Epoch 26/100
1582/1582 [==============================] - 271s - loss: 0.6419 - acc: 0.6814 - val_loss: 0.6406 - val_acc: 0.6842
Epoch 27/100
1582/1582 [==============================] - 271s - loss: 0.6414 - acc: 0.6814 - val_loss: 0.6401 - val_acc: 0.6842
Epoch 28/100
1582/1582 [==============================] - 271s - loss: 0.6410 - acc: 0.6814 - val_loss: 0.6397 - val_acc: 0.6842
Epoch 29/100
1582/1582 [==============================] - 271s - loss: 0.6406 - acc: 0.6814 - val_loss: 0.6393 - val_acc: 0.6842
Epoch 30/100
1582/1582 [==============================] - 271s - loss: 0.6402 - acc: 0.6814 - val_loss: 0.6389 - val_acc: 0.6842
Epoch 31/100
1582/1582 [==============================] - 271s - loss: 0.6398 - acc: 0.6814 - val_loss: 0.6385 - val_acc: 0.6842
Epoch 32/100
1582/1582 [==============================] - 271s - loss: 0.6394 - acc: 0.6814 - val_loss: 0.6381 - val_acc: 0.6842
Epoch 33/100
1582/1582 [==============================] - 271s - loss: 0.6391 - acc: 0.6814 - val_loss: 0.6377 - val_acc: 0.6842
Epoch 34/100
1582/1582 [==============================] - 271s - loss: 0.6387 - acc: 0.6814 - val_loss: 0.6374 - val_acc: 0.6842
Epoch 35/100
1582/1582 [==============================] - 271s - loss: 0.6384 - acc: 0.6814 - val_loss: 0.6371 - val_acc: 0.6842
Epoch 36/100
1582/1582 [==============================] - 271s - loss: 0.6381 - acc: 0.6814 - val_loss: 0.6367 - val_acc: 0.6842
Epoch 37/100
1582/1582 [==============================] - 271s - loss: 0.6378 - acc: 0.6814 - val_loss: 0.6364 - val_acc: 0.6842
Epoch 38/100
1582/1582 [==============================] - 271s - loss: 0.6375 - acc: 0.6814 - val_loss: 0.6361 - val_acc: 0.6842
Epoch 39/100
1582/1582 [==============================] - 271s - loss: 0.6372 - acc: 0.6814 - val_loss: 0.6359 - val_acc: 0.6842
Epoch 40/100
1582/1582 [==============================] - 271s - loss: 0.6370 - acc: 0.6814 - val_loss: 0.6356 - val_acc: 0.6842
Epoch 41/100
1582/1582 [==============================] - 271s - loss: 0.6367 - acc: 0.6814 - val_loss: 0.6353 - val_acc: 0.6842
Epoch 42/100
1582/1582 [==============================] - 271s - loss: 0.6365 - acc: 0.6814 - val_loss: 0.6351 - val_acc: 0.6842
Epoch 43/100
1582/1582 [==============================] - 271s - loss: 0.6362 - acc: 0.6814 - val_loss: 0.6348 - val_acc: 0.6842
Epoch 44/100
1582/1582 [==============================] - 271s - loss: 0.6360 - acc: 0.6814 - val_loss: 0.6346 - val_acc: 0.6842
Epoch 45/100
1582/1582 [==============================] - 271s - loss: 0.6358 - acc: 0.6814 - val_loss: 0.6344 - val_acc: 0.6842
Epoch 46/100
1582/1582 [==============================] - 271s - loss: 0.6355 - acc: 0.6814 - val_loss: 0.6341 - val_acc: 0.6842
Epoch 47/100
1582/1582 [==============================] - 271s - loss: 0.6353 - acc: 0.6814 - val_loss: 0.6339 - val_acc: 0.6842
Epoch 48/100
1582/1582 [==============================] - 271s - loss: 0.6351 - acc: 0.6814 - val_loss: 0.6337 - val_acc: 0.6842
Epoch 49/100
1582/1582 [==============================] - 271s - loss: 0.6349 - acc: 0.6814 - val_loss: 0.6335 - val_acc: 0.6842
Epoch 50/100
1582/1582 [==============================] - 271s - loss: 0.6347 - acc: 0.6814 - val_loss: 0.6333 - val_acc: 0.6842
Epoch 51/100
1582/1582 [==============================] - 271s - loss: 0.6346 - acc: 0.6814 - val_loss: 0.6331 - val_acc: 0.6842
Epoch 52/100
1582/1582 [==============================] - 271s - loss: 0.6344 - acc: 0.6814 - val_loss: 0.6329 - val_acc: 0.6842
Epoch 53/100
1582/1582 [==============================] - 271s - loss: 0.6342 - acc: 0.6814 - val_loss: 0.6327 - val_acc: 0.6842
Epoch 54/100
1582/1582 [==============================] - 271s - loss: 0.6340 - acc: 0.6814 - val_loss: 0.6326 - val_acc: 0.6842
Epoch 55/100
1582/1582 [==============================] - 271s - loss: 0.6339 - acc: 0.6814 - val_loss: 0.6324 - val_acc: 0.6842
Epoch 56/100
1582/1582 [==============================] - 271s - loss: 0.6337 - acc: 0.6814 - val_loss: 0.6322 - val_acc: 0.6842
Epoch 57/100
1582/1582 [==============================] - 271s - loss: 0.6336 - acc: 0.6814 - val_loss: 0.6321 - val_acc: 0.6842
Epoch 58/100
1582/1582 [==============================] - 271s - loss: 0.6334 - acc: 0.6814 - val_loss: 0.6319 - val_acc: 0.6842
Epoch 59/100
1582/1582 [==============================] - 271s - loss: 0.6333 - acc: 0.6814 - val_loss: 0.6318 - val_acc: 0.6842
Epoch 60/100
1582/1582 [==============================] - 271s - loss: 0.6331 - acc: 0.6814 - val_loss: 0.6316 - val_acc: 0.6842
Epoch 61/100
1582/1582 [==============================] - 271s - loss: 0.6330 - acc: 0.6814 - val_loss: 0.6315 - val_acc: 0.6842
Epoch 62/100
1582/1582 [==============================] - 271s - loss: 0.6328 - acc: 0.6814 - val_loss: 0.6313 - val_acc: 0.6842
Epoch 63/100
1582/1582 [==============================] - 271s - loss: 0.6327 - acc: 0.6814 - val_loss: 0.6312 - val_acc: 0.6842
Epoch 64/100
1582/1582 [==============================] - 271s - loss: 0.6326 - acc: 0.6814 - val_loss: 0.6311 - val_acc: 0.6842
Epoch 65/100
1582/1582 [==============================] - 271s - loss: 0.6324 - acc: 0.6814 - val_loss: 0.6309 - val_acc: 0.6842
Epoch 66/100
1582/1582 [==============================] - 271s - loss: 0.6323 - acc: 0.6814 - val_loss: 0.6308 - val_acc: 0.6842
Epoch 67/100
1582/1582 [==============================] - 271s - loss: 0.6322 - acc: 0.6814 - val_loss: 0.6307 - val_acc: 0.6842
Epoch 68/100
1582/1582 [==============================] - 271s - loss: 0.6321 - acc: 0.6814 - val_loss: 0.6306 - val_acc: 0.6842
Epoch 69/100
1582/1582 [==============================] - 271s - loss: 0.6320 - acc: 0.6814 - val_loss: 0.6305 - val_acc: 0.6842
Epoch 70/100
1582/1582 [==============================] - 271s - loss: 0.6319 - acc: 0.6814 - val_loss: 0.6303 - val_acc: 0.6842
Epoch 71/100
1582/1582 [==============================] - 271s - loss: 0.6318 - acc: 0.6814 - val_loss: 0.6302 - val_acc: 0.6842
Epoch 72/100
1582/1582 [==============================] - 271s - loss: 0.6317 - acc: 0.6814 - val_loss: 0.6301 - val_acc: 0.6842
Epoch 73/100
1582/1582 [==============================] - 271s - loss: 0.6316 - acc: 0.6814 - val_loss: 0.6300 - val_acc: 0.6842
Epoch 74/100
1582/1582 [==============================] - 271s - loss: 0.6315 - acc: 0.6814 - val_loss: 0.6299 - val_acc: 0.6842
Epoch 75/100
1582/1582 [==============================] - 271s - loss: 0.6314 - acc: 0.6814 - val_loss: 0.6298 - val_acc: 0.6842
Epoch 76/100
1582/1582 [==============================] - 271s - loss: 0.6313 - acc: 0.6814 - val_loss: 0.6297 - val_acc: 0.6842
Epoch 77/100
1582/1582 [==============================] - 271s - loss: 0.6312 - acc: 0.6814 - val_loss: 0.6296 - val_acc: 0.6842
Epoch 78/100
1582/1582 [==============================] - 271s - loss: 0.6311 - acc: 0.6814 - val_loss: 0.6295 - val_acc: 0.6842
Epoch 79/100
1582/1582 [==============================] - 271s - loss: 0.6310 - acc: 0.6814 - val_loss: 0.6294 - val_acc: 0.6842
Epoch 80/100
1582/1582 [==============================] - 271s - loss: 0.6309 - acc: 0.6814 - val_loss: 0.6293 - val_acc: 0.6842
Epoch 81/100
1582/1582 [==============================] - 271s - loss: 0.6308 - acc: 0.6814 - val_loss: 0.6293 - val_acc: 0.6842
Epoch 82/100
1582/1582 [==============================] - 271s - loss: 0.6307 - acc: 0.6814 - val_loss: 0.6292 - val_acc: 0.6842
Epoch 83/100
1582/1582 [==============================] - 271s - loss: 0.6307 - acc: 0.6814 - val_loss: 0.6291 - val_acc: 0.6842
Epoch 84/100
1582/1582 [==============================] - 271s - loss: 0.6306 - acc: 0.6814 - val_loss: 0.6290 - val_acc: 0.6842
Epoch 85/100
1582/1582 [==============================] - 271s - loss: 0.6305 - acc: 0.6814 - val_loss: 0.6289 - val_acc: 0.6842
Epoch 86/100
1582/1582 [==============================] - 271s - loss: 0.6304 - acc: 0.6814 - val_loss: 0.6288 - val_acc: 0.6842
Epoch 87/100
1582/1582 [==============================] - 271s - loss: 0.6304 - acc: 0.6814 - val_loss: 0.6288 - val_acc: 0.6842
Epoch 88/100
1582/1582 [==============================] - 271s - loss: 0.6303 - acc: 0.6814 - val_loss: 0.6287 - val_acc: 0.6842
Epoch 89/100
1582/1582 [==============================] - 271s - loss: 0.6302 - acc: 0.6814 - val_loss: 0.6286 - val_acc: 0.6842
Epoch 90/100
1582/1582 [==============================] - 271s - loss: 0.6302 - acc: 0.6814 - val_loss: 0.6285 - val_acc: 0.6842
Epoch 91/100
1582/1582 [==============================] - 271s - loss: 0.6301 - acc: 0.6814 - val_loss: 0.6285 - val_acc: 0.6842
Epoch 92/100
1582/1582 [==============================] - 271s - loss: 0.6300 - acc: 0.6814 - val_loss: 0.6284 - val_acc: 0.6842
Epoch 93/100
1582/1582 [==============================] - 271s - loss: 0.6300 - acc: 0.6814 - val_loss: 0.6283 - val_acc: 0.6842
Epoch 94/100
1582/1582 [==============================] - 271s - loss: 0.6299 - acc: 0.6814 - val_loss: 0.6283 - val_acc: 0.6842
Epoch 95/100
1582/1582 [==============================] - 271s - loss: 0.6298 - acc: 0.6814 - val_loss: 0.6282 - val_acc: 0.6842
Epoch 96/100
1582/1582 [==============================] - 271s - loss: 0.6298 - acc: 0.6814 - val_loss: 0.6281 - val_acc: 0.6842
Epoch 97/100
1582/1582 [==============================] - 271s - loss: 0.6297 - acc: 0.6814 - val_loss: 0.6281 - val_acc: 0.6842
Epoch 98/100
1582/1582 [==============================] - 271s - loss: 0.6296 - acc: 0.6814 - val_loss: 0.6280 - val_acc: 0.6842
Epoch 99/100
1582/1582 [==============================] - 271s - loss: 0.6296 - acc: 0.6814 - val_loss: 0.6280 - val_acc: 0.6842
Epoch 100/100
1582/1582 [==============================] - 271s - loss: 0.6295 - acc: 0.6814 - val_loss: 0.6279 - val_acc: 0.6842
Out[7]:
<keras.callbacks.History at 0x7f55cb08cfd0>

In [2]:
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD,RMSprop,Adadelta,Adagrad

def VGG_16(weights_path=None):
    model = Sequential()
    model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='softmax'))

    if weights_path:
        model.load_weights(weights_path)

    return model

#model = VGG_16('/home/mckc/Downloads/vgg16_weights.h5')
model = VGG_16('/home/mckc/Face_code/face.h5')

#model.layers.pop()
#model.add(Dense(2, activation='softmax'))

adagrad = Adagrad(lr=0.001, epsilon=1e-08)
model.compile(optimizer=adagrad, loss='categorical_crossentropy',metrics=['accuracy'])


Using Theano backend.
Using gpu device 0: Quadro M2000M (CNMeM is enabled with initial size: 95.0% of memory, cuDNN 5005)

In [3]:
im = cv2.resize(cv2.imread('/home/mckc/Downloads/vgg_face_caffe/ak.png'), (224, 224)).astype(np.float32)
im[:,:,0] -= 103.939
im[:,:,1] -= 116.779
im[:,:,2] -= 123.68
im = im.transpose((2,0,1))
im = np.expand_dims(im, axis=0)
out = model.predict(im)
print np.argmax(out)
labels = np.loadtxt('/home/mckc/Downloads/vgg_face_caffe/names.txt', str, delimiter='\t')

im = im[0,:,:,:].transpose(1,2,0)
im[:,:,0] += 103.939
im[:,:,1] += 116.779
im[:,:,2] += 123.68
plt.imshow(im.astype(np.uint8))
print labels[np.argmax(out)]


396
Clark_Duke

In [10]:
model.fit(X,Y_train,verbose=1,validation_data=(X_ts,Y_test),nb_epoch=150,batch_size=5)


Train on 1582 samples, validate on 38 samples
Epoch 1/150
1582/1582 [==============================] - 249s - loss: 0.6307 - acc: 0.6814 - val_loss: 0.6292 - val_acc: 0.6842
Epoch 2/150
1582/1582 [==============================] - 249s - loss: 0.6307 - acc: 0.6814 - val_loss: 0.6291 - val_acc: 0.6842
Epoch 3/150
1582/1582 [==============================] - 249s - loss: 0.6306 - acc: 0.6814 - val_loss: 0.6290 - val_acc: 0.6842
Epoch 4/150
1582/1582 [==============================] - 249s - loss: 0.6305 - acc: 0.6814 - val_loss: 0.6290 - val_acc: 0.6842
Epoch 5/150
1582/1582 [==============================] - 249s - loss: 0.6305 - acc: 0.6814 - val_loss: 0.6289 - val_acc: 0.6842
Epoch 6/150
1582/1582 [==============================] - 249s - loss: 0.6304 - acc: 0.6814 - val_loss: 0.6288 - val_acc: 0.6842
Epoch 7/150
1582/1582 [==============================] - 249s - loss: 0.6303 - acc: 0.6814 - val_loss: 0.6287 - val_acc: 0.6842
Epoch 8/150
1582/1582 [==============================] - 249s - loss: 0.6303 - acc: 0.6814 - val_loss: 0.6287 - val_acc: 0.6842
Epoch 9/150
1582/1582 [==============================] - 249s - loss: 0.6302 - acc: 0.6814 - val_loss: 0.6286 - val_acc: 0.6842
Epoch 10/150
1582/1582 [==============================] - 248s - loss: 0.6301 - acc: 0.6814 - val_loss: 0.6285 - val_acc: 0.6842
Epoch 11/150
1582/1582 [==============================] - 249s - loss: 0.6301 - acc: 0.6814 - val_loss: 0.6285 - val_acc: 0.6842
Epoch 12/150
1582/1582 [==============================] - 249s - loss: 0.6300 - acc: 0.6814 - val_loss: 0.6284 - val_acc: 0.6842
Epoch 13/150
1582/1582 [==============================] - 249s - loss: 0.6300 - acc: 0.6814 - val_loss: 0.6283 - val_acc: 0.6842
Epoch 14/150
1582/1582 [==============================] - 249s - loss: 0.6299 - acc: 0.6814 - val_loss: 0.6283 - val_acc: 0.6842
Epoch 15/150
1582/1582 [==============================] - 249s - loss: 0.6298 - acc: 0.6814 - val_loss: 0.6282 - val_acc: 0.6842
Epoch 16/150
1582/1582 [==============================] - 249s - loss: 0.6298 - acc: 0.6814 - val_loss: 0.6282 - val_acc: 0.6842
Epoch 17/150
1582/1582 [==============================] - 249s - loss: 0.6297 - acc: 0.6814 - val_loss: 0.6281 - val_acc: 0.6842
Epoch 18/150
1582/1582 [==============================] - 249s - loss: 0.6297 - acc: 0.6814 - val_loss: 0.6280 - val_acc: 0.6842
Epoch 19/150
1582/1582 [==============================] - 249s - loss: 0.6296 - acc: 0.6814 - val_loss: 0.6280 - val_acc: 0.6842
Epoch 20/150
1582/1582 [==============================] - 249s - loss: 0.6296 - acc: 0.6814 - val_loss: 0.6279 - val_acc: 0.6842
Epoch 21/150
1582/1582 [==============================] - 249s - loss: 0.6295 - acc: 0.6814 - val_loss: 0.6279 - val_acc: 0.6842
Epoch 22/150
1582/1582 [==============================] - 249s - loss: 0.6295 - acc: 0.6814 - val_loss: 0.6278 - val_acc: 0.6842
Epoch 23/150
1582/1582 [==============================] - 249s - loss: 0.6294 - acc: 0.6814 - val_loss: 0.6278 - val_acc: 0.6842
Epoch 24/150
1582/1582 [==============================] - 249s - loss: 0.6294 - acc: 0.6814 - val_loss: 0.6277 - val_acc: 0.6842
Epoch 25/150
1582/1582 [==============================] - 249s - loss: 0.6293 - acc: 0.6814 - val_loss: 0.6277 - val_acc: 0.6842
Epoch 26/150
1582/1582 [==============================] - 249s - loss: 0.6293 - acc: 0.6814 - val_loss: 0.6276 - val_acc: 0.6842
Epoch 27/150
1582/1582 [==============================] - 249s - loss: 0.6292 - acc: 0.6814 - val_loss: 0.6276 - val_acc: 0.6842
Epoch 28/150
1582/1582 [==============================] - 249s - loss: 0.6292 - acc: 0.6814 - val_loss: 0.6275 - val_acc: 0.6842
Epoch 29/150
1582/1582 [==============================] - 249s - loss: 0.6291 - acc: 0.6814 - val_loss: 0.6275 - val_acc: 0.6842
Epoch 30/150
1582/1582 [==============================] - 249s - loss: 0.6291 - acc: 0.6814 - val_loss: 0.6274 - val_acc: 0.6842
Epoch 31/150
1582/1582 [==============================] - 249s - loss: 0.6291 - acc: 0.6814 - val_loss: 0.6274 - val_acc: 0.6842
Epoch 32/150
1582/1582 [==============================] - 249s - loss: 0.6290 - acc: 0.6814 - val_loss: 0.6273 - val_acc: 0.6842
Epoch 33/150
 890/1582 [===============>..............] - ETA: 108s - loss: 0.6299 - acc: 0.6798
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-10-3ffef7e05f64> in <module>()
----> 1 model.fit(X,Y_train,verbose=1,validation_data=(X_ts,Y_test),nb_epoch=150,batch_size=5)

/home/mckc/anaconda/lib/python2.7/site-packages/keras/models.pyc in fit(self, x, y, batch_size, nb_epoch, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, **kwargs)
    618                               shuffle=shuffle,
    619                               class_weight=class_weight,
--> 620                               sample_weight=sample_weight)
    621 
    622     def evaluate(self, x, y, batch_size=32, verbose=1,

/home/mckc/anaconda/lib/python2.7/site-packages/keras/engine/training.pyc in fit(self, x, y, batch_size, nb_epoch, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight)
   1102                               verbose=verbose, callbacks=callbacks,
   1103                               val_f=val_f, val_ins=val_ins, shuffle=shuffle,
-> 1104                               callback_metrics=callback_metrics)
   1105 
   1106     def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):

/home/mckc/anaconda/lib/python2.7/site-packages/keras/engine/training.pyc in _fit_loop(self, f, ins, out_labels, batch_size, nb_epoch, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics)
    820                 batch_logs['size'] = len(batch_ids)
    821                 callbacks.on_batch_begin(batch_index, batch_logs)
--> 822                 outs = f(ins_batch)
    823                 if type(outs) != list:
    824                     outs = [outs]

/home/mckc/anaconda/lib/python2.7/site-packages/keras/backend/theano_backend.pyc in __call__(self, inputs)
    670     def __call__(self, inputs):
    671         assert type(inputs) in {list, tuple}
--> 672         return self.function(*inputs)
    673 
    674 

/home/mckc/Downloads/Theano-master/theano/compile/function_module.pyc in __call__(self, *args, **kwargs)
    864         try:
    865             outputs =\
--> 866                 self.fn() if output_subset is None else\
    867                 self.fn(output_subset=output_subset)
    868         except Exception:

KeyboardInterrupt: 

In [ ]:
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(X, Y_train, batch_size=2),validation_data=(X_ts.astype(np.float16),Y_test)
                    ,nb_worker=7,samples_per_epoch=len(X), nb_epoch=1000, pickle_safe=True)


Epoch 1/1000
 262/1582 [===>..........................] - ETA: 304s - loss: 0.6727 - acc: 0.6603

In [4]:
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD
import cv2, numpy as np

def VGG_19(weights_path=None):
    model = Sequential()
    model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='softmax'))

    if weights_path:
        model.load_weights(weights_path)

    return model


model = VGG_19('/home/mckc/Downloads/vgg19_weights.h5')

model.layers.pop()
model.add(Dense(2, activation='softmax'))

sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy')

In [5]:
model.fit(X_tr.astype(np.float16),Y_train,verbose=1,validation_data=(X_ts.astype(np.float16),Y_test),nb_epoch=5000,batch_size=5)


Train on 121 samples, validate on 40 samples
Epoch 1/5000
121/121 [==============================] - 24s - loss: 0.6877 - val_loss: 0.6791
Epoch 2/5000
121/121 [==============================] - 24s - loss: 0.6702 - val_loss: 0.6597
Epoch 3/5000
121/121 [==============================] - 24s - loss: 0.6536 - val_loss: 0.6429
Epoch 4/5000
121/121 [==============================] - 25s - loss: 0.6404 - val_loss: 0.6324
Epoch 5/5000
121/121 [==============================] - 25s - loss: 0.6307 - val_loss: 0.6240
Epoch 6/5000
121/121 [==============================] - 25s - loss: 0.6245 - val_loss: 0.6186
Epoch 7/5000
121/121 [==============================] - 25s - loss: 0.6214 - val_loss: 0.6168
Epoch 8/5000
121/121 [==============================] - 25s - loss: 0.6200 - val_loss: 0.6155
Epoch 9/5000
121/121 [==============================] - 25s - loss: 0.6180 - val_loss: 0.6135
Epoch 10/5000
121/121 [==============================] - 25s - loss: 0.6118 - val_loss: 0.6123
Epoch 11/5000
121/121 [==============================] - 25s - loss: 0.6193 - val_loss: 0.6113
Epoch 12/5000
121/121 [==============================] - 25s - loss: 0.6161 - val_loss: 0.6113
Epoch 13/5000
121/121 [==============================] - 25s - loss: 0.6167 - val_loss: 0.6111
Epoch 14/5000
121/121 [==============================] - 25s - loss: 0.6164 - val_loss: 0.6113
Epoch 15/5000
121/121 [==============================] - 25s - loss: 0.6160 - val_loss: 0.6112
Epoch 16/5000
121/121 [==============================] - 25s - loss: 0.6163 - val_loss: 0.6114
Epoch 17/5000
121/121 [==============================] - 25s - loss: 0.6161 - val_loss: 0.6113
Epoch 18/5000
 45/121 [==========>...................] - ETA: 14s - loss: 0.5851
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-5-832cdf4bd8ef> in <module>()
----> 1 model.fit(X_tr.astype(np.float16),Y_train,verbose=1,validation_data=(X_ts.astype(np.float16),Y_test),nb_epoch=5000,batch_size=5)

/home/mckc/anaconda/lib/python2.7/site-packages/keras/models.pyc in fit(self, x, y, batch_size, nb_epoch, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, **kwargs)
    618                               shuffle=shuffle,
    619                               class_weight=class_weight,
--> 620                               sample_weight=sample_weight)
    621 
    622     def evaluate(self, x, y, batch_size=32, verbose=1,

/home/mckc/anaconda/lib/python2.7/site-packages/keras/engine/training.pyc in fit(self, x, y, batch_size, nb_epoch, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight)
   1102                               verbose=verbose, callbacks=callbacks,
   1103                               val_f=val_f, val_ins=val_ins, shuffle=shuffle,
-> 1104                               callback_metrics=callback_metrics)
   1105 
   1106     def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):

/home/mckc/anaconda/lib/python2.7/site-packages/keras/engine/training.pyc in _fit_loop(self, f, ins, out_labels, batch_size, nb_epoch, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics)
    820                 batch_logs['size'] = len(batch_ids)
    821                 callbacks.on_batch_begin(batch_index, batch_logs)
--> 822                 outs = f(ins_batch)
    823                 if type(outs) != list:
    824                     outs = [outs]

/home/mckc/anaconda/lib/python2.7/site-packages/keras/backend/theano_backend.pyc in __call__(self, inputs)
    670     def __call__(self, inputs):
    671         assert type(inputs) in {list, tuple}
--> 672         return self.function(*inputs)
    673 
    674 

/home/mckc/anaconda/lib/python2.7/site-packages/Theano-0.9.0.dev2-py2.7.egg/theano/compile/function_module.pyc in __call__(self, *args, **kwargs)
    864         try:
    865             outputs =\
--> 866                 self.fn() if output_subset is None else\
    867                 self.fn(output_subset=output_subset)
    868         except Exception:

KeyboardInterrupt: 

In [ ]:
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(X_tr, Y_train, batch_size=10),validation_data=(X_ts.astype(np.float16),Y_test)
                    ,nb_worker=7,samples_per_epoch=len(X_tr), nb_epoch=5000, pickle_safe=True)

In [1]:
from keras.optimizers import SGD
from keras.models import Model
from keras.layers import Dense, Input, Activation
from convnetskeras. convnets import convnet

alexnet = convnet('alexnet', weights_path='/home/mckc/Downloads/alexnet_weights.h5')

input = alexnet.input
img_representation = alexnet.get_layer("dense_2").output

classifier = Dense(7,name='classifier')(img_representation)
classifier = Activation("softmax", name="softmax")(classifier)
model = Model(input=input,output=classifier)
sgd = SGD(lr=.001, decay=1.e-6, momentum=0.9, nesterov=False)
model.compile(optimizer=sgd, loss='categorical_crossentropy',metrics=["accuracy"])


Using Theano backend.
Using gpu device 0: Quadro M2000M (CNMeM is enabled with initial size: 95.0% of memory, cuDNN 5005)

In [16]:
model = Alexnet()


Start training with Alexnet CNN :

In [17]:
model.load_weights('/home/mckc/Downloads/alexnet_weights.h5')


---------------------------------------------------------------------------
Exception                                 Traceback (most recent call last)
<ipython-input-17-c52466916381> in <module>()
----> 1 model.load_weights('/home/mckc/Downloads/alexnet_weights.h5')

/home/mckc/anaconda/lib/python2.7/site-packages/keras/engine/topology.pyc in load_weights(self, filepath)
   2487         if 'layer_names' not in f.attrs and 'model_weights' in f:
   2488             f = f['model_weights']
-> 2489         self.load_weights_from_hdf5_group(f)
   2490         if hasattr(f, 'close'):
   2491             f.close()

/home/mckc/anaconda/lib/python2.7/site-packages/keras/engine/topology.pyc in load_weights_from_hdf5_group(self, f)
   2538                                 'containing ' + str(len(layer_names)) +
   2539                                 ' layers into a model with ' +
-> 2540                                 str(len(flattened_layers)) + ' layers.')
   2541 
   2542             # we batch weight value assignments in a single backend call

Exception: You are trying to load a weight file containing 11 layers into a model with 8 layers.

In [ ]: